bitkeeper revision 1.1689 (42a58901_lkUvZPbAZcV8H9a9NNmtg)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 7 Jun 2005 11:46:09 +0000 (11:46 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 7 Jun 2005 11:46:09 +0000 (11:46 +0000)
Clean up the domain_page.h interfaces. One common header file
<xen/domain_page.h> and map_domain_mem() -> map_domain_page(), takes
a pfn rather than a paddr.
Signed-off-by: Keir Fraser <keir@xensource.com>
26 files changed:
.rootkeys
xen/arch/ia64/dom0_ops.c
xen/arch/ia64/pdb-stub.c
xen/arch/x86/audit.c
xen/arch/x86/dom0_ops.c
xen/arch/x86/mm.c
xen/arch/x86/setup.c
xen/arch/x86/shadow.c
xen/arch/x86/traps.c
xen/arch/x86/vmx.c
xen/arch/x86/vmx_platform.c
xen/arch/x86/vmx_vmcs.c
xen/arch/x86/x86_32/domain_page.c
xen/arch/x86/x86_32/mm.c
xen/common/dom0_ops.c
xen/common/dom_mem_ops.c
xen/common/domain.c
xen/common/page_alloc.c
xen/include/asm-ia64/domain_page.h [deleted file]
xen/include/asm-x86/config.h
xen/include/asm-x86/domain_page.h [deleted file]
xen/include/asm-x86/shadow.h
xen/include/asm-x86/x86_32/domain_page.h [deleted file]
xen/include/asm-x86/x86_64/domain_page.h [deleted file]
xen/include/xen/domain_page.h [new file with mode: 0644]
xen/include/xen/perfc_defn.h

index 25ea25e4413fcce1feb7bc09ee5133554fa5aa27..1fb74503062e210c48398edbd82b04bbd848d727 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
 421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
 421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
-4241e879ry316Y_teC18DuK7mGKaQw xen/include/asm-ia64/domain_page.h
 4241e880hAyo_dk0PPDYj3LsMIvf-Q xen/include/asm-ia64/flushtlb.h
 421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
 421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
 3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-x86/desc.h
 40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h
 4204e7acwzqgXyTAPKa1nM-L7Ec0Qw xen/include/asm-x86/domain.h
-41febc4bBKTKHhnAu_KPYwgNkHjFlg xen/include/asm-x86/domain_page.h
 41d3eaaeIBzW621S1oa0c2yk7X43qQ xen/include/asm-x86/e820.h
 3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h
 3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h
 420951dcqyUCe_gXA_XJPu1ix_poKg xen/include/asm-x86/vmx_virpit.h
 41c0c412lQ0NVVN9PsOSznQ-qhOiPA xen/include/asm-x86/vmx_vmcs.h
 418fbcfe_WliJPToeVM-9VStvym-hw xen/include/asm-x86/x86_32/asm_defns.h
-3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-x86/x86_32/domain_page.h
 429c852fi3pvfa9kIjryYK5AGBmXAg xen/include/asm-x86/x86_32/page-2level.h
 429c852fskvSOgcD5EC25_m9um9t4g xen/include/asm-x86/x86_32/page-3level.h
 4208e2a3ZNFroNXbX9OYaOB-xtUyDQ xen/include/asm-x86/x86_32/page.h
 3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/x86_32/regs.h
 3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h
 41bf1717bML6GxpclTWJabiaO5W5vg xen/include/asm-x86/x86_64/asm_defns.h
-41febc4b1aCGLsm0Y0b_82h7lFtrEA xen/include/asm-x86/x86_64/domain_page.h
 4208e2a3Fktw4ZttKdDxbhvTQ6brfQ xen/include/asm-x86/x86_64/page.h
 404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/regs.h
 404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
 3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen/include/xen/delay.h
 4294b5efxcDdUVp4XMEE__IFw7nPow xen/include/xen/dmi.h
 40f2b4a2hC3HtChu-ArD8LyojxWMjg xen/include/xen/domain.h
+3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/xen/domain_page.h
 3ddb79c2O729EttZTYu1c8LcsUO_GQ xen/include/xen/elf.h
 3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen/include/xen/errno.h
 3ddb79c1W0lQca8gRV7sN6j3iY4Luw xen/include/xen/event.h
index daaa87445c533a586c1575ab1603bab31340dfca..e0b48080bca7d3fcd0f615918f59137673eef72c 100644 (file)
 #include <public/dom0_ops.h>
 #include <xen/sched.h>
 #include <xen/event.h>
-#include <asm/domain_page.h>
-//#include <asm/msr.h>
 #include <asm/pdb.h>
 #include <xen/trace.h>
 #include <xen/console.h>
-//#include <xen/shadow.h>
 #include <public/sched_ctl.h>
 
 #define TRC_DOM0OP_ENTER_BASE  0x00020000
index 8c1ea5d06b22da602e5db5e60e97e265f38a79ed..49c81313124d7439b3c11f54835d063d1162a410 100644 (file)
@@ -14,8 +14,6 @@
 #include <xen/sched.h>
 #include <asm/ptrace.h>
 #include <xen/keyhandler.h> 
-//#include <asm/apic.h>
-#include <asm/domain_page.h>                           /* [un]map_domain_mem */
 #include <asm/processor.h>
 #include <asm/pdb.h>
 #include <xen/list.h>
index ef58f52eff5da68020653c017b29612b09597309..dc2a14979a7d406c8275caac5ea734054224d993 100644 (file)
@@ -122,7 +122,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
 
     void adjust_l2_page(unsigned long mfn, int shadow)
     {
-        unsigned long *pt = map_domain_mem(mfn << PAGE_SHIFT);
+        unsigned long *pt = map_domain_page(mfn);
         int i;
 
         for ( i = 0; i < l2limit; i++ )
@@ -205,12 +205,12 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
             adjust(hl2page, 0);
         }
 
-        unmap_domain_mem(pt);
+        unmap_domain_page(pt);
     }
 
     void adjust_hl2_page(unsigned long hl2mfn)
     {
-        unsigned long *pt = map_domain_mem(hl2mfn << PAGE_SHIFT);
+        unsigned long *pt = map_domain_page(hl2mfn);
         int i;
 
         for ( i = 0; i < l2limit; i++ )
@@ -251,12 +251,12 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
             }
         }
 
-        unmap_domain_mem(pt);
+        unmap_domain_page(pt);
     }
 
     void adjust_l1_page(unsigned long l1mfn)
     {
-        unsigned long *pt = map_domain_mem(l1mfn << PAGE_SHIFT);
+        unsigned long *pt = map_domain_page(l1mfn);
         int i;
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
@@ -323,7 +323,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
             }
         }
 
-        unmap_domain_mem(pt);
+        unmap_domain_page(pt);
     }
 
     void adjust_shadow_tables()
@@ -615,7 +615,7 @@ void _audit_domain(struct domain *d, int flags)
                              unsigned long mfn)
     {
         struct pfn_info *page = &frame_table[mfn];
-        unsigned long *pt = map_domain_mem(mfn<<PAGE_SHIFT);
+        unsigned long *pt = map_domain_page(mfn);
         int i;
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
@@ -627,7 +627,7 @@ void _audit_domain(struct domain *d, int flags)
                        page->count_info, i, pt[i]);
         }
 
-        unmap_domain_mem(pt);           
+        unmap_domain_page(pt);           
     }
 
     void scan_for_pfn_in_grant_table(struct domain *d, unsigned xmfn)
index 87b4cb9df77b2fb727626e87e083026539d81a6c..dbb723090dcc344535cdf0a4d9dc75cc6771e175 100644 (file)
@@ -13,7 +13,7 @@
 #include <public/dom0_ops.h>
 #include <xen/sched.h>
 #include <xen/event.h>
-#include <asm/domain_page.h>
+#include <xen/domain_page.h>
 #include <asm/msr.h>
 #include <xen/trace.h>
 #include <xen/console.h>
index 797b89ed0c3383bb03b16dae0deaa7ca05d5ba0f..00ba933799ab8ffbc8106029affe288e1181d962 100644 (file)
 #include <xen/perfc.h>
 #include <xen/irq.h>
 #include <xen/softirq.h>
+#include <xen/domain_page.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
 #include <asm/flushtlb.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
-#include <asm/domain_page.h>
 #include <asm/ldt.h>
 #include <asm/x86_emulate.h>
 
@@ -269,17 +269,17 @@ static int alloc_segdesc_page(struct pfn_info *page)
     struct desc_struct *descs;
     int i;
 
-    descs = map_domain_mem((page-frame_table) << PAGE_SHIFT);
+    descs = map_domain_page(page_to_pfn(page));
 
     for ( i = 0; i < 512; i++ )
         if ( unlikely(!check_descriptor(&descs[i])) )
             goto fail;
 
-    unmap_domain_mem(descs);
+    unmap_domain_page(descs);
     return 1;
 
  fail:
-    unmap_domain_mem(descs);
+    unmap_domain_page(descs);
     return 0;
 }
 
@@ -665,14 +665,14 @@ static int alloc_l1_table(struct pfn_info *page)
 
     ASSERT(!shadow_mode_refcounts(d));
 
-    pl1e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl1e = map_domain_page(pfn);
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l1_slot(i) &&
              unlikely(!get_page_from_l1e(pl1e[i], d)) )
             goto fail;
 
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
     return 1;
 
  fail:
@@ -680,7 +680,7 @@ static int alloc_l1_table(struct pfn_info *page)
         if ( is_guest_l1_slot(i) )
             put_page_from_l1e(pl1e[i], d);
 
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
     return 0;
 }
 
@@ -699,7 +699,7 @@ static inline int fixup_pae_linear_mappings(l3_pgentry_t *pl3e)
         return 0;
     }
 
-    pl2e = map_domain_mem(l3e_get_paddr(pl3e[3]));
+    pl2e = map_domain_page(l3e_get_pfn(pl3e[3]));
     for (i = 0; i < 4; i++) {
         vaddr = LINEAR_PT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
         idx = (vaddr >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
@@ -709,7 +709,7 @@ static inline int fixup_pae_linear_mappings(l3_pgentry_t *pl3e)
         } else
             pl2e[idx] = l2e_empty();
     }
-    unmap_domain_mem(pl2e);
+    unmap_domain_page(pl2e);
 
     return 1;
 }
@@ -749,7 +749,7 @@ static int alloc_l2_table(struct pfn_info *page, unsigned int type)
     ASSERT( !shadow_mode_refcounts(d) );
    
     
-    pl2e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl2e = map_domain_page(pfn);
 
     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) {
         vaddr = i << L2_PAGETABLE_SHIFT;
@@ -790,7 +790,7 @@ static int alloc_l2_table(struct pfn_info *page, unsigned int type)
     }
 #endif
 
-    unmap_domain_mem(pl2e);
+    unmap_domain_page(pl2e);
     return 1;
 
  fail:
@@ -798,7 +798,7 @@ static int alloc_l2_table(struct pfn_info *page, unsigned int type)
         if ( is_guest_l2_slot(type, i) )
             put_page_from_l2e(pl2e[i], pfn);
 
-    unmap_domain_mem(pl2e);
+    unmap_domain_page(pl2e);
     return 0;
 }
 
@@ -815,7 +815,7 @@ static int alloc_l3_table(struct pfn_info *page)
 
     ASSERT( !shadow_mode_refcounts(d) );
 
-    pl3e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl3e = map_domain_page(pfn);
     for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ ) {
         vaddr = i << L3_PAGETABLE_SHIFT;
         if ( is_guest_l3_slot(i) &&
@@ -825,7 +825,7 @@ static int alloc_l3_table(struct pfn_info *page)
 
     if (!fixup_pae_linear_mappings(pl3e))
         goto fail;
-    unmap_domain_mem(pl3e);
+    unmap_domain_page(pl3e);
     return 1;
 
  fail:
@@ -833,7 +833,7 @@ static int alloc_l3_table(struct pfn_info *page)
         if ( is_guest_l3_slot(i) )
             put_page_from_l3e(pl3e[i], pfn);
 
-    unmap_domain_mem(pl3e);
+    unmap_domain_page(pl3e);
     return 0;
 }
 
@@ -891,13 +891,13 @@ static void free_l1_table(struct pfn_info *page)
     l1_pgentry_t *pl1e;
     int i;
 
-    pl1e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl1e = map_domain_page(pfn);
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l1_slot(i) )
             put_page_from_l1e(pl1e[i], d);
 
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
 }
 
 
@@ -907,14 +907,14 @@ static void free_l2_table(struct pfn_info *page)
     l2_pgentry_t *pl2e;
     int i;
 
-    pl2e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl2e = map_domain_page(pfn);
 
     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) {
         if ( is_guest_l2_slot(page->u.inuse.type_info, i) )
             put_page_from_l2e(pl2e[i], pfn);
     }
 
-    unmap_domain_mem(pl2e);
+    unmap_domain_page(pl2e);
 }
 
 
@@ -926,13 +926,13 @@ static void free_l3_table(struct pfn_info *page)
     l3_pgentry_t *pl3e;
     int           i;
 
-    pl3e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl3e = map_domain_page(pfn);
 
     for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l3_slot(i) )
             put_page_from_l3e(pl3e[i], pfn);
 
-    unmap_domain_mem(pl3e);
+    unmap_domain_page(pl3e);
 }
 
 #endif
@@ -2011,7 +2011,8 @@ int do_mmu_update(
                 break;
             }
 
-            va = map_domain_mem_with_cache(req.ptr, &mapcache);
+            va = map_domain_page_with_cache(mfn, &mapcache);
+            va = (void *)((unsigned long)va + (req.ptr & ~PAGE_MASK));
             page = &frame_table[mfn];
 
             switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
@@ -2105,7 +2106,7 @@ int do_mmu_update(
                 break;
             }
 
-            unmap_domain_mem_with_cache(va, &mapcache);
+            unmap_domain_page_with_cache(va, &mapcache);
 
             put_page(page);
             break;
@@ -2452,6 +2453,7 @@ long do_update_descriptor(unsigned long pa, u64 desc)
     struct domain *dom = current->domain;
     unsigned long gpfn = pa >> PAGE_SHIFT;
     unsigned long mfn;
+    unsigned int  offset = (pa & ~PAGE_MASK) / sizeof(struct desc_struct);
     struct desc_struct *gdt_pent, d;
     struct pfn_info *page;
     long ret = -EINVAL;
@@ -2460,18 +2462,18 @@ long do_update_descriptor(unsigned long pa, u64 desc)
 
     LOCK_BIGLOCK(dom);
 
-    if ( !VALID_MFN(mfn = __gpfn_to_mfn(dom, gpfn)) ) {
-        UNLOCK_BIGLOCK(dom);
-        return -EINVAL;
-    }
-
-    if ( (pa & 7) || (mfn >= max_page) || !check_descriptor(&d) ) {
+    if ( !VALID_MFN(mfn = __gpfn_to_mfn(dom, gpfn)) ||
+         ((pa % sizeof(struct desc_struct)) != 0) ||
+         (mfn >= max_page) ||
+         !check_descriptor(&d) )
+    {
         UNLOCK_BIGLOCK(dom);
         return -EINVAL;
     }
 
     page = &frame_table[mfn];
-    if ( unlikely(!get_page(page, dom)) ) {
+    if ( unlikely(!get_page(page, dom)) )
+    {
         UNLOCK_BIGLOCK(dom);
         return -EINVAL;
     }
@@ -2505,9 +2507,9 @@ long do_update_descriptor(unsigned long pa, u64 desc)
     }
 
     /* All is good so make the update. */
-    gdt_pent = map_domain_mem((mfn << PAGE_SHIFT) | (pa & ~PAGE_MASK));
-    memcpy(gdt_pent, &d, 8);
-    unmap_domain_mem(gdt_pent);
+    gdt_pent = map_domain_page(mfn);
+    memcpy(&gdt_pent[offset], &d, 8);
+    unmap_domain_page(gdt_pent);
 
     if ( shadow_mode_enabled(dom) )
         shadow_unlock(dom);
@@ -2650,7 +2652,7 @@ void ptwr_flush(struct domain *d, const int which)
 
     pl1e = d->arch.ptwr[which].pl1e;
     modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page);
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
     perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
     d->arch.ptwr[which].prev_nr_updates  = modified;
 
@@ -2741,13 +2743,14 @@ static int ptwr_emulated_update(
         return X86EMUL_UNHANDLEABLE;
 
     /* Checked successfully: do the update (write or cmpxchg). */
-    pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK));
+    pl1e = map_domain_page(page_to_pfn(page));
+    pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
     if ( do_cmpxchg )
     {
         ol1e = l1e_from_intpte(old);
         if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
         {
-            unmap_domain_mem(pl1e);
+            unmap_domain_page(pl1e);
             put_page_from_l1e(nl1e, d);
             return X86EMUL_CMPXCHG_FAILED;
         }
@@ -2757,7 +2760,7 @@ static int ptwr_emulated_update(
         ol1e  = *pl1e;
         *pl1e = nl1e;
     }
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
 
     /* Finally, drop the old PTE. */
     put_page_from_l1e(ol1e, d);
@@ -2909,7 +2912,7 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
     }
     
     /* Temporarily map the L1 page, and make a copy of it. */
-    d->arch.ptwr[which].pl1e = map_domain_mem(pfn << PAGE_SHIFT);
+    d->arch.ptwr[which].pl1e = map_domain_page(pfn);
     memcpy(d->arch.ptwr[which].page,
            d->arch.ptwr[which].pl1e,
            L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
@@ -2922,7 +2925,7 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
         MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
                 &linear_pg_table[addr>>PAGE_SHIFT]);
         /* Toss the writable pagetable state and crash. */
-        unmap_domain_mem(d->arch.ptwr[which].pl1e);
+        unmap_domain_page(d->arch.ptwr[which].pl1e);
         d->arch.ptwr[which].l1va = 0;
         domain_crash();
         return 0;
index c0db9f9c7db251501e5e183b9595aff99a25fd6c..913f3082be757a836355d4347f83807c04be0469 100644 (file)
 #include <xen/serial.h>
 #include <xen/trace.h>
 #include <xen/multiboot.h>
+#include <xen/domain_page.h>
 #include <asm/bitops.h>
 #include <asm/smp.h>
 #include <asm/processor.h>
 #include <asm/mpspec.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
-#include <asm/domain_page.h>
 #include <asm/shadow.h>
 #include <asm/e820.h>
 
index 60683518d60c65200ff2728c8583ac04937fd0fa..bac8f23ad63cadec316a9749df85321e62b09adf 100644 (file)
@@ -23,8 +23,8 @@
 #include <xen/config.h>
 #include <xen/types.h>
 #include <xen/mm.h>
+#include <xen/domain_page.h>
 #include <asm/shadow.h>
-#include <asm/domain_page.h>
 #include <asm/page.h>
 #include <xen/event.h>
 #include <xen/sched.h>
@@ -222,9 +222,9 @@ alloc_shadow_page(struct domain *d,
         else
         {
             page = alloc_domheap_page(NULL);
-            void *l1 = map_domain_mem(page_to_phys(page));
+            void *l1 = map_domain_page(page_to_pfn(page));
             memset(l1, 0, PAGE_SIZE);
-            unmap_domain_mem(l1);
+            unmap_domain_page(l1);
         }
     }
     else
@@ -315,7 +315,7 @@ alloc_shadow_page(struct domain *d,
 static void inline
 free_shadow_l1_table(struct domain *d, unsigned long smfn)
 {
-    l1_pgentry_t *pl1e = map_domain_mem(smfn << PAGE_SHIFT);
+    l1_pgentry_t *pl1e = map_domain_page(smfn);
     int i;
     struct pfn_info *spage = pfn_to_page(smfn);
     u32 min_max = spage->tlbflush_timestamp;
@@ -328,13 +328,13 @@ free_shadow_l1_table(struct domain *d, unsigned long smfn)
         pl1e[i] = l1e_empty();
     }
 
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
 }
 
 static void inline
 free_shadow_hl2_table(struct domain *d, unsigned long smfn)
 {
-    l1_pgentry_t *hl2 = map_domain_mem(smfn << PAGE_SHIFT);
+    l1_pgentry_t *hl2 = map_domain_page(smfn);
     int i, limit;
 
     SH_VVLOG("%s: smfn=%lx freed", __func__, smfn);
@@ -354,13 +354,13 @@ free_shadow_hl2_table(struct domain *d, unsigned long smfn)
             put_page(pfn_to_page(l1e_get_pfn(hl2[i])));
     }
 
-    unmap_domain_mem(hl2);
+    unmap_domain_page(hl2);
 }
 
 static void inline
 free_shadow_l2_table(struct domain *d, unsigned long smfn, unsigned int type)
 {
-    l2_pgentry_t *pl2e = map_domain_mem(smfn << PAGE_SHIFT);
+    l2_pgentry_t *pl2e = map_domain_page(smfn);
     int i, external = shadow_mode_external(d);
 
     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
@@ -376,7 +376,7 @@ free_shadow_l2_table(struct domain *d, unsigned long smfn, unsigned int type)
         put_shadow_ref(l2e_get_pfn(pl2e[l2_table_offset(LINEAR_PT_VIRT_START)]));
     }
 
-    unmap_domain_mem(pl2e);
+    unmap_domain_page(pl2e);
 }
 
 void free_shadow_page(unsigned long smfn)
@@ -689,8 +689,8 @@ static void alloc_monitor_pagetable(struct vcpu *v)
     mmfn_info = alloc_domheap_page(NULL);
     ASSERT(mmfn_info != NULL);
 
-    mmfn = (unsigned long) (mmfn_info - frame_table);
-    mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT);
+    mmfn = page_to_pfn(mmfn_info);
+    mpl2e = (l2_pgentry_t *)map_domain_page(mmfn);
     memset(mpl2e, 0, PAGE_SIZE);
 
 #ifdef __i386__ /* XXX screws x86/64 build */
@@ -749,7 +749,7 @@ void free_monitor_pagetable(struct vcpu *v)
         put_shadow_ref(mfn);
     }
 
-    unmap_domain_mem(mpl2e);
+    unmap_domain_page(mpl2e);
 
     /*
      * Then free monitor_table.
@@ -766,37 +766,37 @@ set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn,
               struct domain_mmap_cache *l2cache,
               struct domain_mmap_cache *l1cache)
 {
-    unsigned long phystab = pagetable_get_paddr(d->arch.phys_table);
+    unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
     l2_pgentry_t *l2, l2e;
     l1_pgentry_t *l1;
     struct pfn_info *l1page;
     unsigned long va = pfn << PAGE_SHIFT;
 
-    ASSERT( phystab );
+    ASSERT(tabpfn != 0);
 
-    l2 = map_domain_mem_with_cache(phystab, l2cache);
+    l2 = map_domain_page_with_cache(tabpfn, l2cache);
     l2e = l2[l2_table_offset(va)];
     if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
     {
         l1page = alloc_domheap_page(NULL);
         if ( !l1page )
         {
-            unmap_domain_mem_with_cache(l2, l2cache);
+            unmap_domain_page_with_cache(l2, l2cache);
             return 0;
         }
 
-        l1 = map_domain_mem_with_cache(page_to_phys(l1page), l1cache);
+        l1 = map_domain_page_with_cache(page_to_pfn(l1page), l1cache);
         memset(l1, 0, PAGE_SIZE);
-        unmap_domain_mem_with_cache(l1, l1cache);
+        unmap_domain_page_with_cache(l1, l1cache);
 
         l2e = l2e_from_page(l1page, __PAGE_HYPERVISOR);
         l2[l2_table_offset(va)] = l2e;
     }
-    unmap_domain_mem_with_cache(l2, l2cache);
+    unmap_domain_page_with_cache(l2, l2cache);
 
-    l1 = map_domain_mem_with_cache(l2e_get_paddr(l2e), l1cache);
+    l1 = map_domain_page_with_cache(l2e_get_pfn(l2e), l1cache);
     l1[l1_table_offset(va)] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
-    unmap_domain_mem_with_cache(l1, l1cache);
+    unmap_domain_page_with_cache(l1, l1cache);
 
     return 1;
 }
@@ -818,9 +818,9 @@ alloc_p2m_table(struct domain *d)
     domain_mmap_cache_init(&l2cache);
 
     d->arch.phys_table = mk_pagetable(page_to_phys(l2page));
-    l2 = map_domain_mem_with_cache(page_to_phys(l2page), &l2cache);
+    l2 = map_domain_page_with_cache(page_to_pfn(l2page), &l2cache);
     memset(l2, 0, PAGE_SIZE);
-    unmap_domain_mem_with_cache(l2, &l2cache);
+    unmap_domain_page_with_cache(l2, &l2cache);
 
     list_ent = d->page_list.next;
     while ( list_ent != &d->page_list )
@@ -888,7 +888,7 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
         if ( v->arch.guest_vtable &&
              (v->arch.guest_vtable != __linear_l2_table) )
         {
-            unmap_domain_mem(v->arch.guest_vtable);
+            unmap_domain_page(v->arch.guest_vtable);
         }
         if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
             v->arch.guest_vtable = __linear_l2_table;
@@ -901,7 +901,7 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
         if ( v->arch.shadow_vtable &&
              (v->arch.shadow_vtable != __shadow_linear_l2_table) )
         {
-            unmap_domain_mem(v->arch.shadow_vtable);
+            unmap_domain_page(v->arch.shadow_vtable);
         }
         if ( !(mode & SHM_external) )
             v->arch.shadow_vtable = __shadow_linear_l2_table;
@@ -914,7 +914,7 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
         if ( v->arch.hl2_vtable &&
              (v->arch.hl2_vtable != __linear_hl2_table) )
         {
-            unmap_domain_mem(v->arch.hl2_vtable);
+            unmap_domain_page(v->arch.hl2_vtable);
         }
         if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
             v->arch.hl2_vtable = __linear_hl2_table;
@@ -1073,7 +1073,7 @@ translate_l1pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l1mfn)
     int i;
     l1_pgentry_t *l1;
 
-    l1 = map_domain_mem(l1mfn << PAGE_SHIFT);
+    l1 = map_domain_page(l1mfn);
     for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
     {
         if ( is_guest_l1_slot(i) &&
@@ -1085,7 +1085,7 @@ translate_l1pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l1mfn)
             l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i]));
         }
     }
-    unmap_domain_mem(l1);
+    unmap_domain_page(l1);
 }
 
 // This is not general enough to handle arbitrary pagetables
@@ -1101,7 +1101,7 @@ translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn,
 
     ASSERT(shadow_mode_translate(d) && !shadow_mode_external(d));
 
-    l2 = map_domain_mem(l2mfn << PAGE_SHIFT);
+    l2 = map_domain_page(l2mfn);
     for (i = 0; i < L2_PAGETABLE_ENTRIES; i++)
     {
         if ( is_guest_l2_slot(type, i) &&
@@ -1114,7 +1114,7 @@ translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn,
             translate_l1pgtable(d, p2m, mfn);
         }
     }
-    unmap_domain_mem(l2);
+    unmap_domain_page(l2);
 }
 
 static void free_shadow_ht_entries(struct domain *d)
@@ -1404,24 +1404,23 @@ gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
     perfc_incrc(gpfn_to_mfn_foreign);
 
     unsigned long va = gpfn << PAGE_SHIFT;
-    unsigned long phystab = pagetable_get_paddr(d->arch.phys_table);
-    l2_pgentry_t *l2 = map_domain_mem(phystab);
+    unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
+    l2_pgentry_t *l2 = map_domain_page(tabpfn);
     l2_pgentry_t l2e = l2[l2_table_offset(va)];
-    unmap_domain_mem(l2);
+    unmap_domain_page(l2);
     if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
     {
         printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l2e=%" PRIpte "\n",
                d->domain_id, gpfn, l2e_get_intpte(l2e));
         return INVALID_MFN;
     }
-    unsigned long l1tab = l2e_get_paddr(l2e);
-    l1_pgentry_t *l1 = map_domain_mem(l1tab);
+    l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e));
     l1_pgentry_t l1e = l1[l1_table_offset(va)];
-    unmap_domain_mem(l1);
+    unmap_domain_page(l1);
 
 #if 0
-    printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx phystab=%lx l2e=%lx l1tab=%lx, l1e=%lx\n",
-           d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
+    printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx tabpfn=%lx l2e=%lx l1tab=%lx, l1e=%lx\n",
+           d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, tabpfn, l2e, l1tab, l1e);
 #endif
 
     if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
@@ -1455,7 +1454,7 @@ shadow_hl2_table(struct domain *d, unsigned long gpfn, unsigned long gmfn,
              gpfn, gmfn, smfn, hl2mfn);
     perfc_incrc(shadow_hl2_table_count);
 
-    hl2 = map_domain_mem(hl2mfn << PAGE_SHIFT);
+    hl2 = map_domain_page(hl2mfn);
 
 #ifdef __i386__
     if ( shadow_mode_external(d) )
@@ -1483,7 +1482,7 @@ shadow_hl2_table(struct domain *d, unsigned long gpfn, unsigned long gmfn,
             l1e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
     }
 
-    unmap_domain_mem(hl2);
+    unmap_domain_page(hl2);
 
     return hl2mfn;
 }
@@ -1510,7 +1509,7 @@ static unsigned long shadow_l2_table(
         BUG(); /* XXX Deal gracefully with failure. */
     }
 
-    spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
+    spl2e = (l2_pgentry_t *)map_domain_page(smfn);
 
     /* Install hypervisor and 2x linear p.t. mapings. */
     if ( (PGT_base_page_table == PGT_l2_page_table) &&
@@ -1565,7 +1564,7 @@ static unsigned long shadow_l2_table(
         memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));        
     }
 
-    unmap_domain_mem(spl2e);
+    unmap_domain_page(spl2e);
 
     SH_VLOG("shadow_l2_table(%lx -> %lx)", gmfn, smfn);
     return smfn;
@@ -1776,11 +1775,11 @@ shadow_make_snapshot(
     min *= sizeof(l1_pgentry_t);
     length *= sizeof(l1_pgentry_t);
 
-    original = map_domain_mem(gmfn << PAGE_SHIFT);
-    snapshot = map_domain_mem(smfn << PAGE_SHIFT);
+    original = map_domain_page(gmfn);
+    snapshot = map_domain_page(smfn);
     memcpy(snapshot + min, original + min, length);
-    unmap_domain_mem(original);
-    unmap_domain_mem(snapshot);
+    unmap_domain_page(original);
+    unmap_domain_page(snapshot);
 
     return smfn;
 }
@@ -1800,9 +1799,9 @@ shadow_free_snapshot(struct domain *d, struct out_of_sync_entry *entry)
     // XXX Need to think about how to protect the domain's
     // information less expensively.
     //
-    snapshot = map_domain_mem(entry->snapshot_mfn << PAGE_SHIFT);
+    snapshot = map_domain_page(entry->snapshot_mfn);
     memset(snapshot, 0, PAGE_SIZE);
-    unmap_domain_mem(snapshot);
+    unmap_domain_page(snapshot);
 
     put_shadow_ref(entry->snapshot_mfn);
 }
@@ -1915,7 +1914,7 @@ static int snapshot_entry_matches(
     if ( !smfn )
         return 0;
 
-    snapshot = map_domain_mem(smfn << PAGE_SHIFT);
+    snapshot = map_domain_page(smfn);
 
     // This could probably be smarter, but this is sufficent for
     // our current needs.
@@ -1923,7 +1922,7 @@ static int snapshot_entry_matches(
     entries_match = !l1e_has_changed(guest_pt[index], snapshot[index],
                                      PAGE_FLAG_MASK);
 
-    unmap_domain_mem(snapshot);
+    unmap_domain_page(snapshot);
 
 #ifdef PERF_COUNTERS
     if ( entries_match )
@@ -2065,7 +2064,7 @@ static u32 remove_all_write_access_in_ptpage(
     unsigned long readonly_gpfn, unsigned long readonly_gmfn,
     u32 max_refs_to_find, unsigned long prediction)
 {
-    l1_pgentry_t *pt = map_domain_mem(pt_mfn << PAGE_SHIFT);
+    l1_pgentry_t *pt = map_domain_page(pt_mfn);
     l1_pgentry_t match;
     unsigned long flags = _PAGE_RW | _PAGE_PRESENT;
     int i;
@@ -2105,7 +2104,7 @@ static u32 remove_all_write_access_in_ptpage(
     {
         perfc_incrc(remove_write_fast_exit);
         increase_writable_pte_prediction(d, readonly_gpfn, prediction);
-        unmap_domain_mem(pt);
+        unmap_domain_page(pt);
         return found;
     }
  
@@ -2115,7 +2114,7 @@ static u32 remove_all_write_access_in_ptpage(
             break;
     }
 
-    unmap_domain_mem(pt);
+    unmap_domain_page(pt);
 
     return found;
 #undef MATCH_ENTRY
@@ -2207,7 +2206,7 @@ int shadow_remove_all_write_access(
 static u32 remove_all_access_in_page(
     struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn)
 {
-    l1_pgentry_t *pl1e = map_domain_mem(l1mfn << PAGE_SHIFT);
+    l1_pgentry_t *pl1e = map_domain_page(l1mfn);
     l1_pgentry_t match;
     unsigned long flags  = _PAGE_PRESENT;
     int i;
@@ -2233,7 +2232,7 @@ static u32 remove_all_access_in_page(
         }
     }
 
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
 
     return count;
 }
@@ -2321,11 +2320,11 @@ static int resync_all(struct domain *d, u32 stype)
         // Compare guest's new contents to its snapshot, validating
         // and updating its shadow as appropriate.
         //
-        guest    = map_domain_mem(entry->gmfn         << PAGE_SHIFT);
-        snapshot = map_domain_mem(entry->snapshot_mfn << PAGE_SHIFT);
+        guest    = map_domain_page(entry->gmfn);
+        snapshot = map_domain_page(entry->snapshot_mfn);
 
         if ( smfn )
-            shadow = map_domain_mem(smfn << PAGE_SHIFT);
+            shadow = map_domain_page(smfn);
         else
             shadow = NULL;
 
@@ -2466,9 +2465,9 @@ static int resync_all(struct domain *d, u32 stype)
         }
 
         if ( smfn )
-            unmap_domain_mem(shadow);
-        unmap_domain_mem(snapshot);
-        unmap_domain_mem(guest);
+            unmap_domain_page(shadow);
+        unmap_domain_page(snapshot);
+        unmap_domain_page(guest);
 
         if ( unlikely(unshadow) )
         {
@@ -2507,7 +2506,9 @@ void __shadow_sync_all(struct domain *d)
         if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
             continue;
 
-        l1_pgentry_t *ppte = map_domain_mem(entry->writable_pl1e);
+        l1_pgentry_t *ppte = (l1_pgentry_t *)(
+            (char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) +
+            (entry->writable_pl1e & ~PAGE_MASK));
         l1_pgentry_t opte = *ppte;
         l1_pgentry_t npte = opte;
         l1e_remove_flags(npte, _PAGE_RW);
@@ -2518,7 +2519,7 @@ void __shadow_sync_all(struct domain *d)
         *ppte = npte;
         shadow_put_page_from_l1e(opte, d);
 
-        unmap_domain_mem(ppte);
+        unmap_domain_page(ppte);
     }
 
     // XXX mafetter: SMP
@@ -2696,9 +2697,9 @@ void shadow_l1_normal_pt_update(
                  (void *)pa, l1e_get_intpte(gpte));
         l1pte_propagate_from_guest(current->domain, gpte, &spte);
 
-        spl1e = map_domain_mem_with_cache(sl1mfn << PAGE_SHIFT, cache);
+        spl1e = map_domain_page_with_cache(sl1mfn, cache);
         spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = spte;
-        unmap_domain_mem_with_cache(spl1e, cache);
+        unmap_domain_page_with_cache(spl1e, cache);
     }
 
     shadow_unlock(d);
@@ -2719,10 +2720,10 @@ void shadow_l2_normal_pt_update(
     {
         SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%" PRIpte,
                  (void *)pa, l2e_get_intpte(gpde));
-        spl2e = map_domain_mem_with_cache(sl2mfn << PAGE_SHIFT, cache);
+        spl2e = map_domain_page_with_cache(sl2mfn, cache);
         validate_pde_change(d, gpde,
                             &spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)]);
-        unmap_domain_mem_with_cache(spl2e, cache);
+        unmap_domain_page_with_cache(spl2e, cache);
     }
 
     shadow_unlock(d);
@@ -2831,8 +2832,8 @@ void __update_pagetables(struct vcpu *v)
     if ( max_mode & (SHM_enable | SHM_external) )
     {
         if ( likely(v->arch.guest_vtable != NULL) )
-            unmap_domain_mem(v->arch.guest_vtable);
-        v->arch.guest_vtable = map_domain_mem(gmfn << PAGE_SHIFT);
+            unmap_domain_page(v->arch.guest_vtable);
+        v->arch.guest_vtable = map_domain_page(gmfn);
     }
 
     /*
@@ -2855,8 +2856,8 @@ void __update_pagetables(struct vcpu *v)
     if ( max_mode == SHM_external )
     {
         if ( v->arch.shadow_vtable )
-            unmap_domain_mem(v->arch.shadow_vtable);
-        v->arch.shadow_vtable = map_domain_mem(smfn << PAGE_SHIFT);
+            unmap_domain_page(v->arch.shadow_vtable);
+        v->arch.shadow_vtable = map_domain_page(smfn);
     }
 
     /*
@@ -2871,8 +2872,8 @@ void __update_pagetables(struct vcpu *v)
         if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
             hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
         if ( v->arch.hl2_vtable )
-            unmap_domain_mem(v->arch.hl2_vtable);
-        v->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
+            unmap_domain_page(v->arch.hl2_vtable);
+        v->arch.hl2_vtable = map_domain_page(hl2mfn);
     }
 
     /*
@@ -2934,22 +2935,22 @@ mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn)
 
     if ( (smfn = __shadow_status(d, gpfn, PGT_l1_shadow)) )
     {
-        l1e = map_domain_mem(smfn << PAGE_SHIFT);
+        l1e = map_domain_page(smfn);
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
             if ( is_guest_l1_slot(i) &&
                  (l1e_get_flags(l1e[i]) & _PAGE_PRESENT) )
                 l1e_add_flags(l1e[i], SHADOW_REFLECTS_SNAPSHOT);
-        unmap_domain_mem(l1e);
+        unmap_domain_page(l1e);
     }
 
     if ( (smfn = __shadow_status(d, gpfn, PGT_l2_shadow)) )
     {
-        l2e = map_domain_mem(smfn << PAGE_SHIFT);
+        l2e = map_domain_page(smfn);
         for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
             if ( is_guest_l2_slot(0, i) &&
                  (l2e_get_flags(l2e[i]) & _PAGE_PRESENT) )
                 l2e_add_flags(l2e[i], SHADOW_REFLECTS_SNAPSHOT);
-        unmap_domain_mem(l2e);
+        unmap_domain_page(l2e);
     }
 }
 
@@ -3117,21 +3118,21 @@ static int check_l1_table(
     {
         snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
         ASSERT(snapshot_mfn);
-        p_snapshot = map_domain_mem(snapshot_mfn << PAGE_SHIFT);
+        p_snapshot = map_domain_page(snapshot_mfn);
     }
 
-    p_guest  = map_domain_mem(gmfn << PAGE_SHIFT);
-    p_shadow = map_domain_mem(smfn << PAGE_SHIFT);
+    p_guest  = map_domain_page(gmfn);
+    p_shadow = map_domain_page(smfn);
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         errors += check_pte(v, p_guest+i, p_shadow+i,
                             p_snapshot ? p_snapshot+i : NULL,
                             1, l2_idx, i);
  
-    unmap_domain_mem(p_shadow);
-    unmap_domain_mem(p_guest);
+    unmap_domain_page(p_shadow);
+    unmap_domain_page(p_guest);
     if ( p_snapshot )
-        unmap_domain_mem(p_snapshot);
+        unmap_domain_page(p_snapshot);
 
     return errors;
 }
@@ -3146,8 +3147,8 @@ int check_l2_table(
     struct vcpu *v, unsigned long gmfn, unsigned long smfn, int oos_pdes)
 {
     struct domain *d = v->domain;
-    l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT);
-    l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
+    l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_page(gmfn);
+    l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_page(smfn);
     l2_pgentry_t match;
     int i;
     int errors = 0;
@@ -3219,8 +3220,8 @@ int check_l2_table(
                             NULL,
                             2, i, 0);
 
-    unmap_domain_mem(spl2e);
-    unmap_domain_mem(gpl2e);
+    unmap_domain_page(spl2e);
+    unmap_domain_page(gpl2e);
 
 #if 1
     if ( errors )
@@ -3267,8 +3268,8 @@ int _check_pagetable(struct vcpu *v, char *s)
  
     errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes);
 
-    gpl2e = (l2_pgentry_t *) map_domain_mem( ptbase_mfn << PAGE_SHIFT );
-    spl2e = (l2_pgentry_t *) map_domain_mem( smfn << PAGE_SHIFT );
+    gpl2e = (l2_pgentry_t *) map_domain_page(ptbase_mfn);
+    spl2e = (l2_pgentry_t *) map_domain_page(smfn);
 
     /* Go back and recurse. */
 #ifdef __i386__
@@ -3292,8 +3293,8 @@ int _check_pagetable(struct vcpu *v, char *s)
         }
     }
 
-    unmap_domain_mem(spl2e);
-    unmap_domain_mem(gpl2e);
+    unmap_domain_page(spl2e);
+    unmap_domain_page(gpl2e);
 
 #if 0
     SH_VVLOG("PT verified : l2_present = %d, l1_present = %d",
index 9e9d82d13068b2013d7ac27ac0b054ba61c3ac03..cf7aaa3d198fcbc200a07874103535d440a6e3d1 100644 (file)
@@ -39,8 +39,8 @@
 #include <xen/irq.h>
 #include <xen/perfc.h>
 #include <xen/softirq.h>
+#include <xen/domain_page.h>
 #include <asm/shadow.h>
-#include <asm/domain_page.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/atomic.h>
index 8aa476f3c6a7f2df5ed36d1c0cf41850a81ba8ad..deff128ea14c3a25f712bac6c6c1e58d03c269af 100644 (file)
@@ -24,6 +24,7 @@
 #include <xen/sched.h>
 #include <xen/irq.h>
 #include <xen/softirq.h>
+#include <xen/domain_page.h>
 #include <asm/current.h>
 #include <asm/io.h>
 #include <asm/shadow.h>
@@ -102,7 +103,7 @@ void stop_vmx(void)
 }
 
 /*
- * Not all cases recevie valid value in the VM-exit instruction length field.
+ * Not all cases receive valid value in the VM-exit instruction length field.
  */
 #define __get_instruction_length(len) \
     __vmread(INSTRUCTION_LEN, &(len)); \
@@ -118,8 +119,6 @@ static void inline __update_guest_eip(unsigned long inst_len)
 }
 
 
-#include <asm/domain_page.h>
-
 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
 {
     unsigned long eip;
@@ -468,23 +467,24 @@ enum { COPY_IN = 0, COPY_OUT };
 static inline int
 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
 {
-    unsigned char *addr;
+    char *addr;
     unsigned long mfn;
 
-    if ((size + (laddr & (PAGE_SIZE - 1))) >= PAGE_SIZE) {
+    if ( (size + (laddr & (PAGE_SIZE - 1))) >= PAGE_SIZE )
+    {
        printf("vmx_copy exceeds page boundary\n");
-       return 0;
+        return 0;
     }
 
     mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT);
-    addr = map_domain_mem((mfn << PAGE_SHIFT) | (laddr & ~PAGE_MASK));
+    addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
 
     if (dir == COPY_IN)
            memcpy(buf, addr, size);
     else
            memcpy(addr, buf, size);
 
-    unmap_domain_mem(addr);
+    unmap_domain_page(addr);
     return 1;
 }
 
index 7d976a42e8cd0cd43aecb2c2dfb39508082c2bdc..96e7572750d990396d77b5a09c84bab71cdaa903 100644 (file)
@@ -21,7 +21,7 @@
 #include <xen/types.h>
 #include <xen/mm.h>
 #include <asm/shadow.h>
-#include <asm/domain_page.h>
+#include <xen/domain_page.h>
 #include <asm/page.h> 
 #include <xen/event.h> 
 #include <xen/trace.h>
@@ -411,43 +411,41 @@ int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
 {
     l1_pgentry_t gpte;
     unsigned long mfn;
-    unsigned long ma;
-    unsigned char * inst_start;
+    unsigned char *inst_start;
     int remaining = 0;
         
-    if (inst_len > MAX_INST_LEN || inst_len <= 0) {
+    if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) )
         return 0;
-    }
 
-    if (vmx_paging_enabled(current)) {
+    if ( vmx_paging_enabled(current) )
+    {
         gpte = gva_to_gpte(guest_eip);
         mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
         /* Does this cross a page boundary ? */
-        if ((guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK)) {
+        if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) )
+        {
             remaining = (guest_eip + inst_len) & ~PAGE_MASK;
             inst_len -= remaining;
         }
-
-    } else {
+    }
+    else
+    {
         mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT);
     }
-    ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
-    inst_start = (unsigned char *)map_domain_mem(ma);
-                
-    memcpy((char *)buf, inst_start, inst_len);
-    unmap_domain_mem(inst_start);
 
-    if (remaining) {
+    inst_start = map_domain_page(mfn);
+    memcpy((char *)buf, inst_start + (guest_eip & ~PAGE_MASK), inst_len);
+    unmap_domain_page(inst_start);
+
+    if ( remaining )
+    {
         gpte = gva_to_gpte(guest_eip+inst_len+remaining);
         mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
-
-        ma = (mfn << PAGE_SHIFT);
-        inst_start = (unsigned char *)map_domain_mem(ma);
-                
+        inst_start = map_domain_page(mfn);
         memcpy((char *)buf+inst_len, inst_start, remaining);
-        unmap_domain_mem(inst_start);
-
+        unmap_domain_page(inst_start);
     }
+
     return inst_len+remaining;
 }
 
index fa01316aa2064495977771c05e59f979819f7345..c27daedc48273c4eba06ed7034b7293ef6e18dde 100644 (file)
@@ -22,7 +22,7 @@
 #include <xen/mm.h>
 #include <xen/lib.h>
 #include <xen/errno.h>
-
+#include <xen/domain_page.h>
 #include <asm/current.h>
 #include <asm/cpufeature.h>
 #include <asm/processor.h>
@@ -31,7 +31,6 @@
 #include <xen/event.h>
 #include <xen/kernel.h>
 #include <public/io/ioreq.h>
-#include <asm/domain_page.h>
 
 #ifdef CONFIG_VMX
 
@@ -122,8 +121,9 @@ int vmx_setup_platform(struct vcpu *d, struct cpu_user_regs *regs)
     addr = regs->edi;
     offset = (addr & ~PAGE_MASK);
     addr = round_pgdown(addr);
+
     mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
-    p = map_domain_mem(mpfn << PAGE_SHIFT);
+    p = map_domain_page(mpfn);
 
     e820p = (struct e820entry *) ((unsigned long) p + offset); 
 
@@ -131,28 +131,28 @@ int vmx_setup_platform(struct vcpu *d, struct cpu_user_regs *regs)
     print_e820_memory_map(e820p, n);
 #endif
 
-    for (i = 0; i < n; i++) {
-        if (e820p[i].type == E820_SHARED_PAGE) {
+    for ( i = 0; i < n; i++ )
+    {
+        if ( e820p[i].type == E820_SHARED_PAGE )
+        {
             gpfn = (e820p[i].addr >> PAGE_SHIFT);
             break;
         }
     }
 
-    if (gpfn == 0) {
-        printk("No shared Page ?\n");
-        unmap_domain_mem(p);        
+    if ( gpfn == 0 )
+    {
+        unmap_domain_page(p);        
         return -1;
     }   
-    unmap_domain_mem(p);        
 
-    mpfn = phys_to_machine_mapping(gpfn);
-    p = map_domain_mem(mpfn << PAGE_SHIFT);
-    ASSERT(p != NULL);
+    unmap_domain_page(p);        
 
     /* Initialise shared page */
+    mpfn = phys_to_machine_mapping(gpfn);
+    p = map_domain_page(mpfn);
     memset(p, 0, PAGE_SIZE);
-
-    d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p;
+    d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long)p;
 
     return 0;
 }
index c1a0172462849e5619bcf69ffc829108e2c458e4..6bec96bc87945bbd61676d219baa326d1094826a 100644 (file)
 #include <xen/sched.h>
 #include <xen/mm.h>
 #include <xen/perfc.h>
+#include <xen/domain_page.h>
 #include <asm/current.h>
-#include <asm/domain_page.h>
 #include <asm/flushtlb.h>
 #include <asm/hardirq.h>
 
+#define MAPCACHE_ORDER    10
+#define MAPCACHE_ENTRIES  (1 << MAPCACHE_ORDER)
+
 l1_pgentry_t *mapcache;
 static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS];
 static spinlock_t map_lock = SPIN_LOCK_UNLOCKED;
 
-void *map_domain_mem(unsigned long pa)
+void *map_domain_page(unsigned long pfn)
 {
     unsigned long va;
     unsigned int idx, cpu = smp_processor_id();
@@ -34,7 +37,7 @@ void *map_domain_mem(unsigned long pa)
 #endif
 
     ASSERT(!in_irq());
-    perfc_incrc(map_domain_mem_count);
+    perfc_incrc(map_domain_page_count);
 
     spin_lock(&map_lock);
 
@@ -58,15 +61,15 @@ void *map_domain_mem(unsigned long pa)
     }
     while ( l1e_get_flags(cache[idx]) & _PAGE_PRESENT );
 
-    cache[idx] = l1e_from_paddr(pa & PAGE_MASK, __PAGE_HYPERVISOR);
+    cache[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
 
     spin_unlock(&map_lock);
 
-    va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
+    va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
     return (void *)va;
 }
 
-void unmap_domain_mem(void *va)
+void unmap_domain_page(void *va)
 {
     unsigned int idx;
     ASSERT((void *)MAPCACHE_VIRT_START <= va);
index 99dd178feabcf21dba349827808d894c5934cb5c..686064baa4efa43b063c6f4c9ca4396cc05601eb 100644 (file)
 #include <xen/lib.h>
 #include <xen/init.h>
 #include <xen/mm.h>
+#include <xen/sched.h>
 #include <asm/current.h>
 #include <asm/page.h>
 #include <asm/flushtlb.h>
 #include <asm/fixmap.h>
-#include <asm/domain_page.h>
+
+extern l1_pgentry_t *mapcache;
 
 unsigned int PAGE_HYPERVISOR         = __PAGE_HYPERVISOR;
 unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
index 4221c18c7e5f839732baf10a7f00f7f6a22c1924..216af3854ad43b69fdec1938b4e045750a55b4e2 100644 (file)
 #include <xen/types.h>
 #include <xen/lib.h>
 #include <xen/mm.h>
-#include <public/dom0_ops.h>
 #include <xen/sched.h>
 #include <xen/domain.h>
 #include <xen/event.h>
-#include <asm/domain_page.h>
+#include <xen/domain_page.h>
 #include <xen/trace.h>
 #include <xen/console.h>
-#include <public/sched_ctl.h>
 #include <asm/current.h>
+#include <public/dom0_ops.h>
+#include <public/sched_ctl.h>
 
 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
 extern void arch_getdomaininfo_ctxt(
index 9e974be44dead5c35d6996a2fe3cfe8dc012c2e7..ad53b91d1c0de0eadceb25cf1f4518f864305ead 100644 (file)
@@ -15,7 +15,6 @@
 #include <xen/event.h>
 #include <xen/shadow.h>
 #include <asm/current.h>
-#include <asm/domain_page.h>
 #include <asm/hardirq.h>
 
 /*
index 127c57fe1763bc8fdb1a9ae22c3fea1d9a5f2ca4..5c5214926cb57d09d904b614271e45b379c03355 100644 (file)
@@ -15,9 +15,9 @@
 #include <xen/time.h>
 #include <xen/console.h>
 #include <xen/softirq.h>
-#include <public/dom0_ops.h>
-#include <asm/domain_page.h>
+#include <xen/domain_page.h>
 #include <asm/debugger.h>
+#include <public/dom0_ops.h>
 
 /* Both these structures are protected by the domlist_lock. */
 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
index d2524ba009cbdb9aa7740f19b6a8f4433c99ae08..610fbdb020014fb7ec4d9a6b4b33a1a3e82eb51f 100644 (file)
@@ -31,7 +31,7 @@
 #include <xen/irq.h>
 #include <xen/softirq.h>
 #include <xen/shadow.h>
-#include <asm/domain_page.h>
+#include <xen/domain_page.h>
 #include <asm/page.h>
 
 /*
@@ -383,9 +383,9 @@ void scrub_heap_pages(void)
             }
             else
             {
-                p = map_domain_mem(pfn << PAGE_SHIFT);
+                p = map_domain_page(pfn);
                 clear_page(p);
-                unmap_domain_mem(p);
+                unmap_domain_page(p);
             }
         }
         
@@ -674,9 +674,9 @@ static void page_scrub_softirq(void)
         {
             pg = list_entry(ent, struct pfn_info, list);
             ent = ent->prev;
-            p = map_domain_mem(page_to_phys(pg));
+            p = map_domain_page(page_to_pfn(pg));
             clear_page(p);
-            unmap_domain_mem(p);
+            unmap_domain_page(p);
             free_heap_pages(MEMZONE_DOM, pg, 0);
         }
     } while ( (NOW() - start) < MILLISECS(1) );
diff --git a/xen/include/asm-ia64/domain_page.h b/xen/include/asm-ia64/domain_page.h
deleted file mode 100644 (file)
index d131576..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/******************************************************************************
- * domain_page.h
- * 
- * This is a trivial no-op on ia64, where we can 1:1 map all RAM.
- */
-
-#ifndef __ASM_DOMAIN_PAGE_H__
-#define __ASM_DOMAIN_PAGE_H__
-
-#define map_domain_mem(_pa)   phys_to_virt(_pa)
-#define unmap_domain_mem(_va) ((void)(_va))
-
-#endif /* __ASM_DOMAIN_PAGE_H__ */
-
index 48fe47a9532ee21f93b65af74a7caff5df6bb860..bf196d0b177bd596697141a611b938ba61540004 100644 (file)
@@ -189,7 +189,8 @@ extern unsigned long _end; /* standard ELF symbol */
 
 #elif defined(__i386__)
 
-#define CONFIG_X86_32 1
+#define CONFIG_X86_32      1
+#define CONFIG_DOMAIN_PAGE 1
 
 #define asmlinkage __attribute__((regparm(0)))
 
@@ -198,7 +199,7 @@ extern unsigned long _end; /* standard ELF symbol */
  *                                                       ------ ------
  *  I/O remapping area                                   ( 4MB)
  *  Direct-map (1:1) area [Xen code/data/heap]           (12MB)
- *  map_domain_mem cache                                 ( 4MB)
+ *  map_domain_page cache                                ( 4MB)
  *  Per-domain mappings                                  ( 4MB)
  *  Shadow linear pagetable                              ( 4MB) ( 8MB)
  *  Guest linear pagetable                               ( 4MB) ( 8MB)
diff --git a/xen/include/asm-x86/domain_page.h b/xen/include/asm-x86/domain_page.h
deleted file mode 100644 (file)
index 3eae539..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef __x86_64__
-#include <asm/x86_64/domain_page.h>
-#else
-#include <asm/x86_32/domain_page.h>
-#endif
index 08c4339f1630ae8b57c0c0208f6175ea0b0b487d..e4788054e09874aac5690753bc8a0d3666a4fab2 100644 (file)
 #include <xen/perfc.h>
 #include <xen/sched.h>
 #include <xen/mm.h>
+#include <xen/domain_page.h>
 #include <asm/current.h>
 #include <asm/flushtlb.h>
 #include <asm/processor.h>
-#include <asm/domain_page.h>
-#include <public/dom0_ops.h>
-#ifdef CONFIG_VMX
 #include <asm/vmx.h>
-#endif
+#include <public/dom0_ops.h>
 
 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
 
diff --git a/xen/include/asm-x86/x86_32/domain_page.h b/xen/include/asm-x86/x86_32/domain_page.h
deleted file mode 100644 (file)
index c8dcb60..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/******************************************************************************
- * domain_page.h
- * 
- * Allow temporary mapping of domain page frames into Xen space.
- */
-
-#ifndef __ASM_DOMAIN_PAGE_H__
-#define __ASM_DOMAIN_PAGE_H__
-
-#include <xen/config.h>
-#include <xen/sched.h>
-
-extern l1_pgentry_t *mapcache;
-#define MAPCACHE_ORDER    10
-#define MAPCACHE_ENTRIES  (1 << MAPCACHE_ORDER)
-
-/*
- * Maps a given physical address, returning corresponding virtual address.
- * The entire page containing that VA is now accessible until a 
- * corresponding call to unmap_domain_mem().
- */
-extern void *map_domain_mem(unsigned long pa);
-
-/*
- * Pass a VA within a page previously mapped with map_domain_mem().
- * That page will then be removed from the mapping lists.
- */
-extern void unmap_domain_mem(void *va);
-
-#define DMCACHE_ENTRY_VALID 1UL
-#define DMCACHE_ENTRY_HELD  2UL
-
-struct domain_mmap_cache {
-    unsigned long pa;
-    void *va;
-};
-
-static inline void
-domain_mmap_cache_init(struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    cache->pa = 0;
-}
-
-static inline void *
-map_domain_mem_with_cache(unsigned long pa, struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    BUG_ON(cache->pa & DMCACHE_ENTRY_HELD);
-
-    if ( likely(cache->pa) )
-    {
-        cache->pa |= DMCACHE_ENTRY_HELD;
-        if ( likely((pa & PAGE_MASK) == (cache->pa & PAGE_MASK)) )
-            goto done;
-        unmap_domain_mem(cache->va);
-    }
-
-    cache->pa = (pa & PAGE_MASK) | DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
-    cache->va = map_domain_mem(cache->pa);
-
- done:
-    return (void *)(((unsigned long)cache->va & PAGE_MASK) |
-                    (pa & ~PAGE_MASK));
-}
-
-static inline void
-unmap_domain_mem_with_cache(void *va, struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    cache->pa &= ~DMCACHE_ENTRY_HELD;
-}
-
-static inline void
-domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    if ( likely(cache->pa) )
-    {
-        unmap_domain_mem(cache->va);
-        cache->pa = 0;
-    }
-}
-
-#endif /* __ASM_DOMAIN_PAGE_H__ */
diff --git a/xen/include/asm-x86/x86_64/domain_page.h b/xen/include/asm-x86/x86_64/domain_page.h
deleted file mode 100644 (file)
index ae0e6d2..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/******************************************************************************
- * domain_page.h
- * 
- * This is a trivial no-op on x86/64, where we can 1:1 map all RAM.
- */
-
-#ifndef __ASM_DOMAIN_PAGE_H__
-#define __ASM_DOMAIN_PAGE_H__
-
-#define map_domain_mem(_pa)   phys_to_virt(_pa)
-#define unmap_domain_mem(_va) ((void)(_va))
-
-struct domain_mmap_cache { 
-};
-
-#define domain_mmap_cache_init(_c)         ((void)(_c))
-#define map_domain_mem_with_cache(_p,_c)   (map_domain_mem(_p))
-#define unmap_domain_mem_with_cache(_v,_c) ((void)(_v))
-#define domain_mmap_cache_destroy(_c)      ((void)(_c))
-
-#endif /* __ASM_DOMAIN_PAGE_H__ */
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
new file mode 100644 (file)
index 0000000..f20e3f2
--- /dev/null
@@ -0,0 +1,100 @@
+/******************************************************************************
+ * domain_page.h
+ * 
+ * Allow temporary mapping of domain page frames into Xen space.
+ */
+
+#ifndef __XEN_DOMAIN_PAGE_H__
+#define __XEN_DOMAIN_PAGE_H__
+
+#include <xen/config.h>
+#include <xen/mm.h>
+
+#ifdef CONFIG_DOMAIN_PAGE
+
+/*
+ * Maps a given page frame, returning the mmap'ed virtual address. The page is 
+ * now accessible until a corresponding call to unmap_domain_page().
+ */
+extern void *map_domain_page(unsigned long pfn);
+
+/*
+ * Pass a VA within a page previously mapped with map_domain_page().
+ * That page will then be removed from the mapping lists.
+ */
+extern void unmap_domain_page(void *va);
+
+#define DMCACHE_ENTRY_VALID 1U
+#define DMCACHE_ENTRY_HELD  2U
+
+struct domain_mmap_cache {
+    unsigned long pfn;
+    void         *va;
+    unsigned int  flags;
+};
+
+static inline void
+domain_mmap_cache_init(struct domain_mmap_cache *cache)
+{
+    ASSERT(cache != NULL);
+    cache->flags = 0;
+}
+
+static inline void *
+map_domain_page_with_cache(unsigned long pfn, struct domain_mmap_cache *cache)
+{
+    ASSERT(cache != NULL);
+    BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
+
+    if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
+    {
+        cache->flags |= DMCACHE_ENTRY_HELD;
+        if ( likely(pfn == cache->pfn) )
+            goto done;
+        unmap_domain_page(cache->va);
+    }
+
+    cache->pfn   = pfn;
+    cache->va    = map_domain_page(pfn);
+    cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
+
+ done:
+    return cache->va;
+}
+
+static inline void
+unmap_domain_page_with_cache(void *va, struct domain_mmap_cache *cache)
+{
+    ASSERT(cache != NULL);
+    cache->flags &= ~DMCACHE_ENTRY_HELD;
+}
+
+static inline void
+domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
+{
+    ASSERT(cache != NULL);
+    BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
+
+    if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
+    {
+        unmap_domain_page(cache->va);
+        cache->flags = 0;
+    }
+}
+
+#else /* !CONFIG_DOMAIN_PAGE */
+
+#define map_domain_page(pfn)                phys_to_virt((pfn)<<PAGE_SHIFT)
+#define unmap_domain_page(va)               ((void)(va))
+
+struct domain_mmap_cache { 
+};
+
+#define domain_mmap_cache_init(c)           ((void)(c))
+#define map_domain_page_with_cache(pfn,c)   (map_domain_page(pfn))
+#define unmap_domain_page_with_cache(va,c)  ((void)(va))
+#define domain_mmap_cache_destroy(c)        ((void)(c))
+
+#endif /* !CONFIG_DOMAIN_PAGE */
+
+#endif /* __XEN_DOMAIN_PAGE_H__ */
index c7f2493a1a14f96cafda20cff19ae11ead6b1ab9..abc17e0b68a7367f1fea9d1f650174c0755c748f 100644 (file)
+#ifndef __XEN_PERFC_DEFN_H__
+#define __XEN_PERFC_DEFN_H__
+
 #define PERFC_MAX_PT_UPDATES 64
 #define PERFC_PT_UPDATES_BUCKET_SIZE 3
-PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
-PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
-PERFCOUNTER_ARRAY( l1_entries_checked, "l1 entries checked", PERFC_MAX_PT_UPDATES )
-PERFCOUNTER_ARRAY( shm_l2_updates, "shadow mode L2 pt updates", PERFC_MAX_PT_UPDATES )
-PERFCOUNTER_ARRAY( shm_hl2_updates, "shadow mode HL2 pt updates", PERFC_MAX_PT_UPDATES )
-PERFCOUNTER_ARRAY( snapshot_copies, "entries copied per snapshot", PERFC_MAX_PT_UPDATES )
-
-PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
-PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
+PERFCOUNTER_ARRAY(wpt_updates,          "writable pt updates",
+                  PERFC_MAX_PT_UPDATES)
+PERFCOUNTER_ARRAY(bpt_updates,          "batched pt updates",
+                  PERFC_MAX_PT_UPDATES)
+PERFCOUNTER_ARRAY(l1_entries_checked,   "l1 entries checked",
+                  PERFC_MAX_PT_UPDATES)
+PERFCOUNTER_ARRAY(shm_l2_updates,       "shadow mode L2 pt updates",
+                  PERFC_MAX_PT_UPDATES)
+PERFCOUNTER_ARRAY(shm_hl2_updates,      "shadow mode HL2 pt updates",
+                  PERFC_MAX_PT_UPDATES)
+PERFCOUNTER_ARRAY(snapshot_copies,      "entries copied per snapshot",
+                  PERFC_MAX_PT_UPDATES)
+
+PERFCOUNTER_ARRAY(hypercalls,           "hypercalls", NR_hypercalls)
+PERFCOUNTER_ARRAY(exceptions,           "exceptions", 32)
 
 #define VMX_PERF_EXIT_REASON_SIZE 37
 #define VMX_PERF_VECTOR_SIZE 0x20
-PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
-PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )
-
-PERFCOUNTER_CPU (seg_fixups,   "segmentation fixups" )
-
-PERFCOUNTER_CPU( irqs,         "#interrupts" )
-PERFCOUNTER_CPU( ipis,         "#IPIs" )
-PERFCOUNTER_CPU( irq_time,     "cycles spent in irq handler" )
-
-PERFCOUNTER_CPU( apic_timer,   "apic timer interrupts" )
-PERFCOUNTER_CPU( ac_timer_max, "ac_timer max error (ns)" )
-PERFCOUNTER_CPU( sched_irq,    "sched: timer" )
-PERFCOUNTER_CPU( sched_run,    "sched: runs through scheduler" )
-PERFCOUNTER_CPU( sched_ctx,    "sched: context switches" )
-
-PERFCOUNTER_CPU( domain_page_tlb_flush, "domain page tlb flushes" )
-PERFCOUNTER_CPU( need_flush_tlb_flush, "PG_need_flush tlb flushes" )
-
-PERFCOUNTER_CPU( calls_to_mmu_update, "calls_to_mmu_update" )
-PERFCOUNTER_CPU( num_page_updates, "num_page_updates" )
-PERFCOUNTER_CPU( calls_to_update_va, "calls_to_update_va_map" )
-PERFCOUNTER_CPU( page_faults, "page faults" )
-PERFCOUNTER_CPU( copy_user_faults, "copy_user faults" )
-
-PERFCOUNTER_CPU(shadow_fault_calls,                "calls to shadow_fault")
-PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present, "sf bailed due to pde not present")
-PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not present")
-PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping,      "sf bailed due to a ro mapping")
-PERFCOUNTER_CPU(shadow_fault_fixed,                "sf fixed the pgfault")
-PERFCOUNTER_CPU(write_fault_bail,                  "sf bailed due to write_fault")
-PERFCOUNTER_CPU(read_fault_bail,                   "sf bailed due to read_fault")
-
-PERFCOUNTER_CPU( map_domain_mem_count, "map_domain_mem count" )
-PERFCOUNTER_CPU( ptwr_emulations, "writable pt emulations" )
-
-PERFCOUNTER_CPU( shadow_l2_table_count,  "shadow_l2_table count" )
-PERFCOUNTER_CPU( shadow_l1_table_count,  "shadow_l1_table count" )
-PERFCOUNTER_CPU( unshadow_table_count,   "unshadow_table count" )
-PERFCOUNTER_CPU( shadow_fixup_count,     "shadow_fixup count" )
-PERFCOUNTER_CPU( shadow_update_va_fail1, "shadow_update_va_fail1" )
-PERFCOUNTER_CPU( shadow_update_va_fail2, "shadow_update_va_fail2" )
+PERFCOUNTER_ARRAY(vmexits,              "vmexits", VMX_PERF_EXIT_REASON_SIZE)
+PERFCOUNTER_ARRAY(cause_vector,         "cause vector", VMX_PERF_VECTOR_SIZE)
+
+PERFCOUNTER_CPU(seg_fixups,             "segmentation fixups")
+
+PERFCOUNTER_CPU(irqs,                   "#interrupts")
+PERFCOUNTER_CPU(ipis,                   "#IPIs")
+PERFCOUNTER_CPU(irq_time,               "cycles spent in irq handler")
+
+PERFCOUNTER_CPU(apic_timer,             "apic timer interrupts")
+PERFCOUNTER_CPU(ac_timer_max,           "ac_timer max error (ns)")
+PERFCOUNTER_CPU(sched_irq,              "sched: timer")
+PERFCOUNTER_CPU(sched_run,              "sched: runs through scheduler")
+PERFCOUNTER_CPU(sched_ctx,              "sched: context switches")
+
+PERFCOUNTER_CPU(domain_page_tlb_flush,  "domain page tlb flushes")
+PERFCOUNTER_CPU(need_flush_tlb_flush,   "PG_need_flush tlb flushes")
+
+PERFCOUNTER_CPU(calls_to_mmu_update,    "calls_to_mmu_update")
+PERFCOUNTER_CPU(num_page_updates,       "num_page_updates")
+PERFCOUNTER_CPU(calls_to_update_va,     "calls_to_update_va_map")
+PERFCOUNTER_CPU(page_faults,            "page faults")
+PERFCOUNTER_CPU(copy_user_faults,       "copy_user faults")
+
+PERFCOUNTER_CPU(shadow_fault_calls,     "calls to shadow_fault")
+PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present,
+                "sf bailed due to pde not present")
+PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present,
+                "sf bailed due to pte not present")
+PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping,
+                "sf bailed due to a ro mapping")
+PERFCOUNTER_CPU(shadow_fault_fixed,     "sf fixed the pgfault")
+PERFCOUNTER_CPU(write_fault_bail,       "sf bailed due to write_fault")
+PERFCOUNTER_CPU(read_fault_bail,        "sf bailed due to read_fault")
+
+PERFCOUNTER_CPU(map_domain_page_count,  "map_domain_page count")
+PERFCOUNTER_CPU(ptwr_emulations,        "writable pt emulations")
+
+PERFCOUNTER_CPU(shadow_l2_table_count,  "shadow_l2_table count")
+PERFCOUNTER_CPU(shadow_l1_table_count,  "shadow_l1_table count")
+PERFCOUNTER_CPU(unshadow_table_count,   "unshadow_table count")
+PERFCOUNTER_CPU(shadow_fixup_count,     "shadow_fixup count")
+PERFCOUNTER_CPU(shadow_update_va_fail1, "shadow_update_va_fail1")
+PERFCOUNTER_CPU(shadow_update_va_fail2, "shadow_update_va_fail2")
 
 /* STATUS counters do not reset when 'P' is hit */
-PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )
-PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
-PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
-PERFSTATUS( snapshot_pages,  "current # fshadow snapshot pages" )
-PERFSTATUS( writable_pte_predictions, "# writable pte predictions")
-PERFSTATUS( free_l1_pages,   "current # free shadow L1 pages" )
-
-PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
-PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )
-
-PERFCOUNTER_CPU( shadow_hl2_table_count,   "shadow_hl2_table count" )
-PERFCOUNTER_CPU( shadow_set_l1e_force_map, "shadow_set_l1e forced to map l1" )
-PERFCOUNTER_CPU( shadow_set_l1e_unlinked,  "shadow_set_l1e found unlinked l1" )
-PERFCOUNTER_CPU( shadow_set_l1e_fail,      "shadow_set_l1e failed (no sl1)" )
-PERFCOUNTER_CPU( shadow_invlpg_faults,     "shadow_invlpg's get_user faulted")
-PERFCOUNTER_CPU( unshadow_l2_count,        "unpinned L2 count")
+PERFSTATUS(shadow_l2_pages,             "current # shadow L2 pages")
+PERFSTATUS(shadow_l1_pages,             "current # shadow L1 pages")
+PERFSTATUS(hl2_table_pages,             "current # hl2 pages")
+PERFSTATUS(snapshot_pages,              "current # fshadow snapshot pages")
+PERFSTATUS(writable_pte_predictions,    "# writable pte predictions")
+PERFSTATUS(free_l1_pages,               "current # free shadow L1 pages")
+
+PERFCOUNTER_CPU(check_pagetable,        "calls to check_pagetable")
+PERFCOUNTER_CPU(check_all_pagetables,   "calls to check_all_pagetables")
+
+PERFCOUNTER_CPU(shadow_hl2_table_count, "shadow_hl2_table count")
+PERFCOUNTER_CPU(shadow_set_l1e_force_map, "shadow_set_l1e forced to map l1")
+PERFCOUNTER_CPU(shadow_set_l1e_unlinked, "shadow_set_l1e found unlinked l1")
+PERFCOUNTER_CPU(shadow_set_l1e_fail,    "shadow_set_l1e failed (no sl1)")
+PERFCOUNTER_CPU(shadow_invlpg_faults,   "shadow_invlpg's get_user faulted")
+PERFCOUNTER_CPU(unshadow_l2_count,      "unpinned L2 count")
 
 PERFCOUNTER_CPU(shadow_status_shortcut, "fastpath miss on shadow cache")
-PERFCOUNTER_CPU(shadow_status_calls,    "calls to ___shadow_status" )
-PERFCOUNTER_CPU(shadow_status_miss,     "missed shadow cache" )
-PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket" )
-PERFCOUNTER_CPU(shadow_max_type,        "calls to shadow_max_type" )
-
-PERFCOUNTER_CPU(shadow_sync_all,                   "calls to shadow_sync_all")
-PERFCOUNTER_CPU(shadow_sync_va,                    "calls to shadow_sync_va")
-PERFCOUNTER_CPU(resync_l1,                         "resync L1 page")
-PERFCOUNTER_CPU(resync_l2,                         "resync L2 page")
-PERFCOUNTER_CPU(resync_hl2,                        "resync HL2 page")
-PERFCOUNTER_CPU(shadow_make_snapshot,              "snapshots created")
-PERFCOUNTER_CPU(shadow_mark_mfn_out_of_sync_calls, "calls to shadow_mk_out_of_sync")
-PERFCOUNTER_CPU(shadow_out_of_sync_calls,          "calls to shadow_out_of_sync")
-PERFCOUNTER_CPU(snapshot_entry_matches_calls,      "calls to ss_entry_matches")
-PERFCOUNTER_CPU(snapshot_entry_matches_true,       "ss_entry_matches returns true")
-
-PERFCOUNTER_CPU(validate_pte_calls,                "calls to validate_pte_change")
-PERFCOUNTER_CPU(validate_pte_changes1,             "validate_pte makes changes1")
-PERFCOUNTER_CPU(validate_pte_changes2,             "validate_pte makes changes2")
-PERFCOUNTER_CPU(validate_pte_changes3,             "validate_pte makes changes3")
-PERFCOUNTER_CPU(validate_pte_changes4,             "validate_pte makes changes4")
-PERFCOUNTER_CPU(validate_pde_calls,                "calls to validate_pde_change")
-PERFCOUNTER_CPU(validate_pde_changes,              "validate_pde makes changes")
-PERFCOUNTER_CPU(shadow_get_page_fail,   "shadow_get_page_from_l1e fails" )
-PERFCOUNTER_CPU(validate_hl2e_calls,               "calls to validate_hl2e_change")
-PERFCOUNTER_CPU(validate_hl2e_changes,             "validate_hl2e makes changes")
-PERFCOUNTER_CPU(exception_fixed,                   "pre-exception fixed")
-PERFCOUNTER_CPU(gpfn_to_mfn_foreign,               "calls to gpfn_to_mfn_foreign")
-PERFCOUNTER_CPU(remove_all_access,                 "calls to remove_all_access")
-PERFCOUNTER_CPU(remove_write_access,               "calls to remove_write_access")
-PERFCOUNTER_CPU(remove_write_access_easy,          "easy outs of remove_write_access")
-PERFCOUNTER_CPU(remove_write_no_work,              "no work in remove_write_access")
-PERFCOUNTER_CPU(remove_write_not_writable,         "remove_write non-writable page")
-PERFCOUNTER_CPU(remove_write_fast_exit,            "remove_write hit predicted entry")
-PERFCOUNTER_CPU(remove_write_predicted,            "remove_write predict hit&exit")
-PERFCOUNTER_CPU(remove_write_bad_prediction,       "remove_write bad prediction")
-PERFCOUNTER_CPU(update_hl2e_invlpg,                "update_hl2e calls invlpg")
+PERFCOUNTER_CPU(shadow_status_calls,    "calls to ___shadow_status")
+PERFCOUNTER_CPU(shadow_status_miss,     "missed shadow cache")
+PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket")
+PERFCOUNTER_CPU(shadow_max_type,        "calls to shadow_max_type")
+
+PERFCOUNTER_CPU(shadow_sync_all,        "calls to shadow_sync_all")
+PERFCOUNTER_CPU(shadow_sync_va,         "calls to shadow_sync_va")
+PERFCOUNTER_CPU(resync_l1,              "resync L1 page")
+PERFCOUNTER_CPU(resync_l2,              "resync L2 page")
+PERFCOUNTER_CPU(resync_hl2,             "resync HL2 page")
+PERFCOUNTER_CPU(shadow_make_snapshot,   "snapshots created")
+PERFCOUNTER_CPU(shadow_mark_mfn_out_of_sync_calls,
+                "calls to shadow_mk_out_of_sync")
+PERFCOUNTER_CPU(shadow_out_of_sync_calls, "calls to shadow_out_of_sync")
+PERFCOUNTER_CPU(snapshot_entry_matches_calls, "calls to ss_entry_matches")
+PERFCOUNTER_CPU(snapshot_entry_matches_true, "ss_entry_matches returns true")
+
+PERFCOUNTER_CPU(validate_pte_calls,     "calls to validate_pte_change")
+PERFCOUNTER_CPU(validate_pte_changes1,  "validate_pte makes changes1")
+PERFCOUNTER_CPU(validate_pte_changes2,  "validate_pte makes changes2")
+PERFCOUNTER_CPU(validate_pte_changes3,  "validate_pte makes changes3")
+PERFCOUNTER_CPU(validate_pte_changes4,  "validate_pte makes changes4")
+PERFCOUNTER_CPU(validate_pde_calls,     "calls to validate_pde_change")
+PERFCOUNTER_CPU(validate_pde_changes,   "validate_pde makes changes")
+PERFCOUNTER_CPU(shadow_get_page_fail,   "shadow_get_page_from_l1e fails")
+PERFCOUNTER_CPU(validate_hl2e_calls,    "calls to validate_hl2e_change")
+PERFCOUNTER_CPU(validate_hl2e_changes,  "validate_hl2e makes changes")
+PERFCOUNTER_CPU(exception_fixed,        "pre-exception fixed")
+PERFCOUNTER_CPU(gpfn_to_mfn_foreign,    "calls to gpfn_to_mfn_foreign")
+PERFCOUNTER_CPU(remove_all_access,      "calls to remove_all_access")
+PERFCOUNTER_CPU(remove_write_access,    "calls to remove_write_access")
+PERFCOUNTER_CPU(remove_write_access_easy, "easy outs of remove_write_access")
+PERFCOUNTER_CPU(remove_write_no_work,   "no work in remove_write_access")
+PERFCOUNTER_CPU(remove_write_not_writable, "remove_write non-writable page")
+PERFCOUNTER_CPU(remove_write_fast_exit, "remove_write hit predicted entry")
+PERFCOUNTER_CPU(remove_write_predicted, "remove_write predict hit&exit")
+PERFCOUNTER_CPU(remove_write_bad_prediction, "remove_write bad prediction")
+PERFCOUNTER_CPU(update_hl2e_invlpg,     "update_hl2e calls invlpg")
+
+#endif /* __XEN_PERFC_DEFN_H__ */